In [2]:
%pylab inline
In [19]:
import nengo
from nengo.utils.ensemble import tuning_curves
model = nengo.Network(label='Neurons')
with model:
neurons = nengo.Ensemble(50, dimensions=1)
connection = nengo.Connection(neurons, neurons) #This is just to generate the decoders
sim = nengo.Simulator(model)
d = sim.data[connection].weights.T
x, A = tuning_curves(neurons, sim)
xhat = numpy.dot(A, 1*d)
x= 1*x
figure(figsize=(4,2))
plot(x, A)
xlabel('x')
ylabel('firing rate (Hz)')
figure()
plot(x, x)
plot(x, xhat)
xlabel('$x$')
ylabel('$\hat{x}$')
print 'RMSE', np.sqrt(np.average((x-xhat)**2))
What about $f(x)=10x$? or larger numbers?
How about $f(x)=x^2$
In [29]:
exponent = 2
with model:
power_conn = nengo.Connection(neurons, neurons, function=lambda x: x**exponent)
sim = nengo.Simulator(model)
d = sim.data[power_conn].weights.T
x, A = tuning_curves(neurons, sim)
xhat = numpy.dot(A, d)
ideal = x**exponent
figure(figsize=(4,2))
plot(x, A)
xlabel('x')
ylabel('firing rate (Hz)')
figure()
plot(x, ideal)
plot(x, xhat)
xlabel('$x$')
ylabel('$\hat{x}$')
print 'RMSE', np.sqrt(np.average((ideal-xhat)**2))
Other exponents?
Polynomials?
Other functions?
In [33]:
c = .5
b = 0
with model:
exp_conn = nengo.Connection(neurons, neurons, function=lambda x: np.exp(-(x-b)**2/(2*c**2)))
sim = nengo.Simulator(model)
d = sim.data[exp_conn].weights.T
x, A = tuning_curves(neurons, sim)
xhat = numpy.dot(A, d)
ideal = np.exp(-(x-b)**2/(2*c**2))
figure(figsize=(4,2))
plot(x, A)
xlabel('x')
ylabel('firing rate (Hz)')
figure()
plot(x, ideal)
plot(x, xhat)
xlabel('$x$')
ylabel('$\hat{x}$')
print 'RMSE', np.sqrt(np.average((ideal-xhat)**2))
Suppose we have a different basis, rotated by 45 degrees ($d_1, d_2$)
This way, we can move back and forth between orthonormal bases
Notice that if we substitute the encoding into the decoding, we recover the same vector we originally encoded
Thus we can think of the coefficients $a_i$ as ‘representing’, or ‘carrying the same information’, or ‘encoding’ the original coefficients $x_i$
Regardless of the function we want to decode, we always invert the same $\Gamma$ matrix
The vectors in $U$ are orthogonal
Consider the neural activity for a particular $x$ value
In general, we can think of the magnitude of the singular value as telling us how relevant that $U$ vector is to the identity of the matrix we have decomposed
So what?
In [13]:
import nengo
from nengo.utils.ensemble import tuning_curves
model = nengo.Network(label='Neurons')
with model:
neurons = nengo.Ensemble(1000, dimensions=1)
connection = nengo.Connection(neurons, neurons) #This is just to generate the decoders
sim = nengo.Simulator(model)
d = sim.data[connection].weights.T
x, A = tuning_curves(neurons, sim)
xhat = numpy.dot(A, d)
Gamma = np.dot(A.T, A)
U,S,V = np.linalg.svd(Gamma)
chi = np.dot(A, U)
for i in range(5):
plot(x, chi[:,i], label='$\chi_%d$=%1.3g'%(i, S[i]), linewidth=3)
legend(loc='best', bbox_to_anchor=(1,1))
show()
In [14]:
subplot(1,2,1)
for i in range(5):
plot(x, chi[:,i], linewidth=3)
subplot(1,2,2)
for i in range(5):
plot(x, numpy.polynomial.legendre.legval(x, numpy.eye(10)[i]))
ylim(-1.2,1.2)
show()
In [15]:
plot(S)
figure()
loglog(S)
Out[15]:
In [16]:
import nengo
from nengo.utils.ensemble import tuning_curves
from nengo.dists import Uniform
model = nengo.Network(label='Neurons')
with model:
neurons = nengo.Ensemble(1000, dimensions=1, intercepts=Uniform(-.3,.3))
connection = nengo.Connection(neurons, neurons) #This is just to generate the decoders
sim = nengo.Simulator(model)
d = sim.data[connection].weights.T
x, A = tuning_curves(neurons, sim)
xhat = numpy.dot(A, d)
Gamma = np.dot(A.T, A)
U,S1,V = np.linalg.svd(Gamma)
chi = np.dot(A, U)
figure()
plot(x,A)
figure()
for i in range(5):
plot(x, chi[:,i], label='$\chi_%d$=%1.3g'%(i, S[i]), linewidth=3)
legend(loc='best', bbox_to_anchor=(1,1))
show()
In [8]:
import nengo
from nengo.utils.ensemble import tuning_curves
from nengo.dists import Uniform
model = nengo.Network(label='Neurons')
with model:
neurons = nengo.Ensemble(1000, dimensions=2, intercepts=Uniform(-.3,.3))
connection = nengo.Connection(neurons, neurons) #This is just to generate the decoders
sim = nengo.Simulator(model)
d = sim.data[connection].weights.T
theta = np.arange(0,2*np.pi,.1)
x = np.array([np.sin(theta), np.cos(theta)]).T
A = neurons.neuron_type.rates(np.dot(x, sim.data[neurons].encoders.T),
sim.data[neurons].gain, sim.data[neurons].bias)
Gamma = np.dot(A.T, A)
U,S2,V = np.linalg.svd(Gamma)
chi = np.dot(A, U)
figure()
plot(theta, A)
figure()
for i in range(5):
plot(theta, chi[:,i], label='$\chi_%d$=%1.3g'%(i, S[i]), linewidth=3)
legend(loc='best', bbox_to_anchor=(1,1));
In [9]:
loglog(S2)
loglog(S)
Out[9]:
You can see these come more in pairs, and seem to be a bit higher. Most importantly, the functions being combined (i.e. basis functions) are quite different.
In [10]:
import nengo
from nengo.utils.ensemble import tuning_curves
from nengo.dists import Uniform
N = 500
model = nengo.Network(label='Neurons', seed=2)
with model:
neurons = nengo.Ensemble(N, dimensions=2)
connection = nengo.Connection(neurons, neurons) #This is just to generate the decoders
sim = nengo.Simulator(model)
d = sim.data[connection].weights.T
x, A = tuning_curves(neurons, sim)
A = np.reshape(A, (2500,N))
Gamma = np.dot(A.T, A)
U,S,V = np.linalg.svd(Gamma)
chi = np.dot(A, U)
for index in [1,3,5]: #What's 0? 3,4,5 (same/diff signs, cross)? Higher?
basis = chi[:,index]
basis.shape = 50,50
x0 = numpy.linspace(-1,1,50)
x1 = numpy.linspace(-1,1,50)
x0, x1 = numpy.array(meshgrid(x0,x1))
from mpl_toolkits.mplot3d.axes3d import Axes3D
fig = pylab.figure()
ax = fig.add_subplot(1, 1, 1, projection='3d')
p = ax.plot_surface(x0, x1, basis, linewidth=0, cstride=1, rstride=1, cmap=pylab.cm.jet)
pylab.title('Basis #%d (sv=%1.4g)'%(index, S[index]))
In higher dimensions, higher order functions are even harder
This is all also going to be affected by distributions of encoders (as well as distributions of intercepts and maximum rates)
In [ ]: